static void
ia64_mca_cmc_vector_disable_keventd(void *unused)
{
- on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
+ on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
}
/*
static void
ia64_mca_cmc_vector_enable_keventd(void *unused)
{
- on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
+ on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
}
#endif /* !XEN */
int ret;
DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
- ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
+ ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
}
#endif /* CONFIG_SMP */
}
/* save the current system wide pmu states */
- ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
+ ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
goto cleanup_reserve;
pfm_alt_intr_handler = NULL;
- ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
+ ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
if (ret) {
DPRINT(("on_each_cpu() failed: %d\n", ret));
}
BUG_ON(in_irq());
spin_lock(&xenpfm_context_lock);
- smp_call_function(&xenpfm_context_load_cpu, &arg, 1, 1);
+ smp_call_function(&xenpfm_context_load_cpu, &arg, 1);
xenpfm_context_load_cpu(&arg);
spin_unlock(&xenpfm_context_lock);
for_each_online_cpu(cpu) {
return error;
}
- smp_call_function(&xenpfm_context_unload_cpu, &arg, 1, 1);
+ smp_call_function(&xenpfm_context_unload_cpu, &arg, 1);
xenpfm_context_unload_cpu(&arg);
spin_unlock(&xenpfm_context_lock);
for_each_online_cpu(cpu) {
void
smp_flush_tlb_all (void)
{
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+ on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
}
void
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial.
*/
- on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+ on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
}
#endif
*/
int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
+smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
int wait)
{
struct call_data_struct data;
* [SUMMARY] Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
* <wait> If true, wait (atomically) until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code.
*
* hardware interrupt handler or from a bottom half handler.
*/
int
-smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
+smp_call_function (void (*func) (void *info), void *info, int wait)
{
struct call_data_struct data;
int cpus = num_online_cpus()-1;
#ifdef XEN
int
on_selected_cpus(const cpumask_t *selected, void (*func) (void *info),
- void *info, int retry, int wait)
+ void *info, int wait)
{
struct call_data_struct data;
unsigned int cpu, nr_cpus = cpus_weight(*selected);
go[MASTER] = 1;
- if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
+ if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
return;
}
flush_data.end = end;
flush_data.nbits = nbits;
on_selected_cpus(&selected_cpus, sn_flush_ptcga_cpu,
- &flush_data, 1, 1);
+ &flush_data, 1);
}
spin_unlock(&sn2_ptcg_lock2);
}
if (cpu != current->processor) {
spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock);
/* Flush VHPT on remote processors. */
- smp_call_function_single(cpu, &ptc_ga_remote_func,
- &args, 0, 1);
+ smp_call_function_single(cpu, &ptc_ga_remote_func, &args, 1);
} else {
ptc_ga_remote_func(&args);
}
if (v->processor == smp_processor_id())
__thash_purge_all(v);
else
- smp_call_function_single(v->processor, __thash_purge_all, v, 1, 1);
+ smp_call_function_single(v->processor, __thash_purge_all, v, 1);
vcpu_unpause(v);
}
if (cpu == smp_processor_id())
processor_get_freq((void*)&freq);
else
- smp_call_function_single(cpu, processor_get_freq,
- (void *)&freq, 0, 1);
+ smp_call_function_single(cpu, processor_get_freq, &freq, 1);
return freq;
}
if (cpu == smp_processor_id())
processor_set_pstate((void *)&value);
else
- smp_call_function_single(cpu, processor_set_pstate,
- (void *)&value, 0, 1);
+ smp_call_function_single(cpu, processor_set_pstate, &value, 1);
if (value) {
printk(KERN_WARNING "Transition failed\n");
new_tlbflush_clock_period(void)
{
/* flush all vhpt of physical cpu and mTLB */
- on_each_cpu(tlbflush_clock_local_flush, NULL, 1, 1);
+ on_each_cpu(tlbflush_clock_local_flush, NULL, 1);
/*
* if global TLB shootdown is finished, increment tlbflush_time
IA64_SAL_DEBUG("SAL_GET_STATE_INFO: remote\n");
ret = smp_call_function_single(e->cpuid,
get_state_info_on,
- &arg, 0, 1);
+ &arg, 1);
if (ret < 0) {
printk("SAL_GET_STATE_INFO "
"smp_call_function_single error:"
int ret;
IA64_SAL_DEBUG("SAL_CLEAR_STATE_INFO: remote\n");
ret = smp_call_function_single(e->cpuid,
- clear_state_info_on, &arg, 0, 1);
+ clear_state_info_on, &arg, 1);
if (ret < 0) {
printk("sal_emulator: "
"SAL_CLEAR_STATE_INFO "
.progress = 0,
.status = 0
};
- smp_call_function(remote_pal_cache_flush,
- (void *)&args, 1, 1);
+ smp_call_function(remote_pal_cache_flush, &args, 1);
if (args.status != 0)
panic_domain(NULL, "PAL_CACHE_FLUSH ERROR, "
"remote status %lx", args.status);
/* must be performed on all remote processors
in the coherence domain. */
smp_call_function(remote_pal_prefetch_visibility,
- (void *)in1, 1, 1);
+ (void *)in1, 1);
status = 1; /* no more necessary on remote processor */
}
break;
status = ia64_pal_mc_drain();
/* FIXME: All vcpus likely call PAL_MC_DRAIN.
That causes the congestion. */
- smp_call_function(remote_pal_mc_drain, NULL, 1, 1);
+ smp_call_function(remote_pal_mc_drain, NULL, 1);
break;
case PAL_BRAND_INFO:
if (in1 == 0) {
// takes care of mTLB flush.
smp_call_function_single(v->processor,
__vcpu_flush_vtlb_all,
- v, 1, 1);
+ v, 1);
}
perfc_incr(domain_flush_vtlb_all);
}
{
/* Very heavy... */
if (HAS_PERVCPU_VHPT(d) || is_hvm_domain(d))
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+ on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
else
- on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
+ on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
cpus_clear (d->domain_dirty_cpumask);
}
thash_purge_all(v);
}
smp_call_function((void (*)(void *))local_flush_tlb_all,
- NULL, 1, 1);
+ NULL, 1);
} else if (HAS_PERVCPU_VHPT(d)) {
for_each_vcpu (d, v) {
if (!v->is_initialised)
vcpu_purge_tr_entry(&PSCBX(v,itlb));
vcpu_vhpt_flush(v);
}
- on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+ on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
} else {
- on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
+ on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1);
}
cpus_clear (d->domain_dirty_cpumask);
}
for_each_cpu_mask (cpu, *mask)
if (cpu != smp_processor_id())
smp_call_function_single
- (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
+ (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1);
}
#ifdef PERF_COUNTERS
if (likely(cpu_isset(smp_processor_id(), cmd->mask)))
do_drv_read((void *)cmd);
else
- on_selected_cpus(&cmd->mask, do_drv_read, (void *)cmd, 0, 1);
+ on_selected_cpus(&cmd->mask, do_drv_read, cmd, 1);
}
static void drv_write(struct drv_cmd *cmd)
cpu_isset(smp_processor_id(), cmd->mask))
do_drv_write((void *)cmd);
else
- on_selected_cpus(&cmd->mask, do_drv_write, (void *)cmd, 0, 0);
+ on_selected_cpus(&cmd->mask, do_drv_write, cmd, 0);
}
static u32 get_cur_val(cpumask_t mask)
read_measured_perf_ctrs((void *)&readin);
} else {
on_selected_cpus(cpumask_of(cpu), read_measured_perf_ctrs,
- (void *)&readin, 0, 1);
+ &readin, 1);
}
cur.aperf.whole = readin.aperf.whole - saved->aperf.whole;
cmd.val = next_perf_state;
- on_selected_cpus(&cmd.mask, transition_pstate, (void *) &cmd, 0, 0);
+ on_selected_cpus(&cmd.mask, transition_pstate, &cmd, 0);
perf->state = next_perf_state;
policy->cur = freqs.new;
{
/* C1E is sometimes enabled during entry to ACPI mode. */
if ((port == acpi_smi_cmd) && (value == acpi_enable_value))
- on_each_cpu(disable_c1e, NULL, 1, 1);
+ on_each_cpu(disable_c1e, NULL, 1);
}
static void __devinit init_amd(struct cpuinfo_x86 *c)
*/
static void mce_amd_work_fn(void *data)
{
- on_each_cpu(mce_amd_checkregs, data, 1, 1);
+ on_each_cpu(mce_amd_checkregs, data, 1);
if (adjust > 0) {
if (!guest_enabled_event(dom0->vcpu[0], VIRQ_MCA) ) {
if (log_cpus == NULL)
return x86_mcerr("do_mca cpuinfo", -ENOMEM);
- if (on_each_cpu(do_mc_get_cpu_info, log_cpus,
- 1, 1) != 0) {
+ if (on_each_cpu(do_mc_get_cpu_info, log_cpus, 1)) {
xfree(log_cpus);
return x86_mcerr("do_mca cpuinfo", -EIO);
}
add_taint(TAINT_ERROR_INJECT);
on_selected_cpus(cpumask_of(target), x86_mc_msrinject,
- mc_msrinject, 1, 1);
+ mc_msrinject, 1);
break;
add_taint(TAINT_ERROR_INJECT);
on_selected_cpus(cpumask_of(target), x86_mc_mceinject,
- mc_mceinject, 1, 1);
+ mc_mceinject, 1);
break;
default:
void cpu_mcheck_distribute_cmci(void)
{
if (cmci_support && !mce_disabled)
- on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0, 0);
+ on_each_cpu(__cpu_mcheck_distribute_cmci, NULL, 0);
}
static void clear_cmci(void)
static void mce_work_fn(void *data)
{
- on_each_cpu(mce_checkregs, NULL, 1, 1);
+ on_each_cpu(mce_checkregs, NULL, 1);
if (variable_period) {
if (adjust)
atomic_set(&data.gate,0);
/* Start the ball rolling on other CPUs */
- if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
+ if (smp_call_function(ipi_handler, &data, 0) != 0)
panic("mtrr: timed out waiting for other CPUs\n");
local_irq_save(flags);
if (cpu == 0)
mtrr_save_fixed_ranges(NULL);
else
- on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1, 1);
+ on_selected_cpus(cpumask_of(0), mtrr_save_fixed_ranges, NULL, 1);
put_cpu();
}
if ( !v->domain->arch.hvm_domain.is_in_uc_mode )
{
/* Flush physical caches. */
- on_each_cpu(local_flush_cache, NULL, 1, 1);
+ on_each_cpu(local_flush_cache, NULL, 1);
hvm_set_uc_mode(v, 1);
}
spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
static void svm_wbinvd_intercept(void)
{
if ( has_arch_pdevs(current->domain) )
- on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+ on_each_cpu(wbinvd_ipi, NULL, 1);
}
static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs)
int cpu = v->arch.hvm_vmx.active_cpu;
if ( cpu != -1 )
- on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1, 1);
+ on_selected_cpus(cpumask_of(cpu), __vmx_clear_vmcs, v, 1);
}
static void vmx_load_vmcs(struct vcpu *v)
{
int cpu = v->arch.hvm_vmx.active_cpu;
if ( cpu != -1 )
- on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1, 1);
+ on_selected_cpus(cpumask_of(cpu), wbinvd_ipi, NULL, 1);
}
vmx_clear_vmcs(v);
if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
{
ASSERT(local_irq_is_enabled());
- on_each_cpu(__ept_sync_domain, d, 1, 1);
+ on_each_cpu(__ept_sync_domain, d, 1);
}
}
return;
if ( cpu_has_wbinvd_exiting )
- on_each_cpu(wbinvd_ipi, NULL, 1, 1);
+ on_each_cpu(wbinvd_ipi, NULL, 1);
else
wbinvd();
}
}
if ( !cpus_empty(cpu_eoi_map) )
- on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+ on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
}
int pirq_guest_eoi(struct domain *d, int irq)
{
cpu_eoi_map = action->cpu_eoi_map;
spin_unlock_irq(&desc->lock);
- on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 0);
+ on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 0);
spin_lock_irq(&desc->lock);
}
break;
{
BUG_ON(action->ack_type != ACKTYPE_EOI);
spin_unlock_irq(&desc->lock);
- on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1, 1);
+ on_selected_cpus(&cpu_eoi_map, set_eoi_ready, desc, 1);
spin_lock_irq(&desc->lock);
}
if ( reboot_cpu_id != smp_processor_id() )
{
on_selected_cpus(cpumask_of(reboot_cpu_id), __machine_reboot_kexec,
- image, 1, 0);
+ image, 0);
for (;;)
; /* nothing */
}
int nmi_setup_events(void)
{
- on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
+ on_each_cpu(nmi_cpu_setup, NULL, 1);
return 0;
}
/* We need to serialize save and setup for HT because the subset
* of msrs are distinct for save and setup operations
*/
- on_each_cpu(nmi_save_registers, NULL, 0, 1);
+ on_each_cpu(nmi_save_registers, NULL, 1);
return 0;
}
void nmi_release_counters(void)
{
- on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
+ on_each_cpu(nmi_cpu_shutdown, NULL, 1);
release_lapic_nmi();
free_msrs();
}
int nmi_start(void)
{
- on_each_cpu(nmi_cpu_start, NULL, 0, 1);
+ on_each_cpu(nmi_cpu_start, NULL, 1);
return 0;
}
void nmi_stop(void)
{
- on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
+ on_each_cpu(nmi_cpu_stop, NULL, 1);
}
watchdog_disable();
console_start_sync();
local_irq_enable();
- smp_call_function(__machine_halt, NULL, 1, 0);
+ smp_call_function(__machine_halt, NULL, 0);
__machine_halt(NULL);
}
{
/* Send IPI to the boot CPU (logical cpu 0). */
on_selected_cpus(cpumask_of(0), __machine_restart,
- &delay_millisecs, 1, 0);
+ &delay_millisecs, 0);
for ( ; ; )
halt();
}
int smp_call_function(
void (*func) (void *info),
void *info,
- int retry,
int wait)
{
cpumask_t allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
- return on_selected_cpus(&allbutself, func, info, retry, wait);
+ return on_selected_cpus(&allbutself, func, info, wait);
}
int on_selected_cpus(
const cpumask_t *selected,
void (*func) (void *info),
void *info,
- int retry,
int wait)
{
struct call_data_struct data;
{
int timeout = 10;
- smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_call_function(stop_this_cpu, NULL, 0);
/* Wait 10ms for all other CPUs to go offline. */
while ( (num_online_cpus() > 1) && (timeout-- > 0) )
opt_consistent_tscs
? time_calibration_tsc_rendezvous
: time_calibration_std_rendezvous,
- &r, 0, 1);
+ &r, 1);
}
void init_percpu_time(void)
case CALLBACKTYPE_sysenter_deprecated:
if ( !cpu_has_sep )
ret = -EINVAL;
- else if ( on_each_cpu(do_update_sysenter, ®->address, 1, 1) != 0 )
+ else if ( on_each_cpu(do_update_sysenter, ®->address, 1) != 0 )
ret = -EIO;
break;
atomic_set(&gdb_smp_paused_count, 0);
- smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0, 0);
+ smp_call_function(gdb_pause_this_cpu, NULL, /* dont wait! */0);
/* Wait 100ms for all other CPUs to enter pause loop */
while ( (atomic_read(&gdb_smp_paused_count) < (num_online_cpus() - 1))
if ( cpu == smp_processor_id() )
continue;
printk("\n*** Dumping CPU%d host state: ***\n", cpu);
- on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1, 1);
+ on_selected_cpus(cpumask_of(cpu), __dump_execstate, NULL, 1);
}
printk("\n");
spin_lock(&lock);
- smp_call_function(read_clocks_slave, NULL, 0, 0);
+ smp_call_function(read_clocks_slave, NULL, 0);
local_irq_disable();
read_clocks_cpumask = cpu_online_map;
extern void __init init_smp_config (void);
extern void smp_do_timer (struct pt_regs *regs);
-extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
- int retry, int wait);
+extern int smp_call_function_single (int cpuid, void (*func) (void *info),
+ void *info, int wait);
extern void smp_send_reschedule (int cpu);
#ifdef XEN
extern void lock_ipi_calllock(unsigned long *flags);
extern int smp_call_function(
void (*func) (void *info),
void *info,
- int retry,
int wait);
/*
const cpumask_t *selected,
void (*func) (void *info),
void *info,
- int retry,
int wait);
/*
static inline int on_each_cpu(
void (*func) (void *info),
void *info,
- int retry,
int wait)
{
- return on_selected_cpus(&cpu_online_map, func, info, retry, wait);
+ return on_selected_cpus(&cpu_online_map, func, info, wait);
}
#define smp_processor_id() raw_smp_processor_id()